#importing some useful packages
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import inspect
from moviepy.editor import VideoFileClip
%matplotlib inline
Demo images and helper function that I implemented to support showing (and testing) various steps in the image processing implementation. Note that this is achieved without getting the output from the video processing loop too messed up and slowed down.
demo_images = []
demo_names = []
for file in os.listdir("test_images/"):
demo_images.append(mpimg.imread('test_images/' + file))
demo_names.append(os.path.splitext(file)[0])
num_demo_images = len(demo_images)
# The implementation shows partial image processing results when demo_mode is True,
# it will be set to False before the final processing loop in the end.
demo_mode = True
fig_cols = 2
fig_rows = num_demo_images / fig_cols
fig_dict = {}
def demo_show(image, title='', cmap=None, lines=None):
if demo_mode:
# Use callstack to get a unique dictionary key for this calling line
caller = inspect.getframeinfo(inspect.stack()[1][0])
fig_key = caller.filename + str(caller.lineno)
# Check if there already is a figure for this caller
if fig_key in fig_dict:
fig, fig_idx = fig_dict[fig_key]
plt.figure(fig.number)
else:
fig = plt.figure(figsize=(15, 15))
fig.suptitle(title, fontsize=16, y=0.9)
fig_idx = 0
if lines is not None:
plt_image = np.copy(image) # Create a copy that can be modified before imshow
draw_lines(plt_image, lines, color=[255, 0, 0], thickness=2)
else:
# Image will not be alternated so just reference input
plt_image = image
fig_idx += 1
ax = plt.subplot(fig_rows, fig_cols, fig_idx)
ax.axis('off')
ax.set_title(demo_names[fig_idx - 1])
ax.imshow(plt_image, cmap=cmap)
fig_dict[fig_key] = (fig, fig_idx)
if fig_idx == num_demo_images:
# Clean up the dictionary mainly to make it possible to run the same demo twice,
# without having to re-run this demo setup cell.
del fig_dict[fig_key]
if not fig_dict:
# All figures are ready, lets show'em
plt.show()
The result is still far from perfect. Enabling debug prints shows more info about some of the problems.
debug_print_active = False
frame_nr = 0
def debug_print(text):
if debug_print_active:
print(text)
Functions provided by Udacity, I only keeped the ones that I ended up using.
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
The region of interest used to mask out areas in the image where lane lines not are expected. This is just a constant area for now, but it would probably be a good idea trying to make it dynamic in some way.
# Common definition of ROI vertices, used both for color and edge based line detections.
# The challenge video does however show some parts of the engine hood, which is why it needs a different ROI.
class Region_of_interest:
def __init__(self):
self.roi_types = {
'normal':
np.array([[(0.04, 1.),
(0.44, 0.61),
(0.56, 0.61),
(0.96, 1.)]],
dtype=np.float),
'challenge':
np.array([[(0.14, 0.91),
(0.44, 0.61),
(0.56, 0.61),
(0.86, 0.91)]],
dtype=np.float)
}
self.roi_key = 'normal' # Use normal ROI as default
def set_roi(self, name='normal'):
self.roi_key = name
def get_vertices(self, image):
return np.round(self.roi_types[self.roi_key] * image.shape[1::-1]).astype(int)
# Provide the ROI vertices as lines (for drawing).
def get_lines(self, image):
vertices = self.get_vertices(image)
lines = []
prev_point = vertices[0][-1]
for current_point in vertices[0]:
line = [prev_point[0], prev_point[1], current_point[0], current_point[1]]
lines.append(line)
prev_point = current_point
return np.array([lines])
roi = Region_of_interest()
for key in roi.roi_types:
roi.set_roi(name=key)
for image in demo_images:
lines = roi.get_lines(image)
demo_show(image, title=f"{key} ROI", lines=lines)
# Reset to normal ROI
roi.set_roi(name='normal')
Use canny edge detector and hough tranform to detect raw line segments
def find_lines_by_edges(image):
demo_show(image, title='RGB input')
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
demo_show(gray, title='Gray scale', cmap='gray')
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray, (kernel_size, kernel_size), 0)
demo_show(blur_gray, title='GaussianBlur', cmap='gray')
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
demo_show(edges, title='Canny edge', cmap='gray')
# Apply ROI masking
masked_edges = region_of_interest(edges, roi.get_vertices(edges))
demo_show(masked_edges, title='ROI masked', cmap='gray')
# Define the Hough transform parameters
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 50 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 100 # minimum number of pixels making up a line
max_line_gap = 160 # maximum gap in pixels between connectible line segments
lines = cv2.HoughLinesP(masked_edges, rho=rho, theta=theta, threshold=threshold,
minLineLength=min_line_length, maxLineGap=max_line_gap)
demo_show(image, title='Raw hough line segments', lines=lines)
return lines
# Show the steps in the edge based raw line segments extraction
demo_edge_lines = []
for image in demo_images:
demo_edge_lines.append(find_lines_by_edges(image))
Color thresholding to detect white and yellow foreground objects, then do some morphological operations to reduce noise and get a skeletonized representation of the forground objects. The hough transform is then used to extract line segments.
def color_threshold_yellow(hsv_image):
hsv_lower_th = np.array([20, 70, 120])
hsv_upper_th = np.array([25, 255, 245])
mask = cv2.inRange(hsv_image, hsv_lower_th, hsv_upper_th)
return mask
def color_threshold_white(hsv_image):
hsv_lower_th = np.array([0, 0, 240])
hsv_upper_th = np.array([255, 255, 255])
mask = cv2.inRange(hsv_image, hsv_lower_th, hsv_upper_th)
return mask
# Use morphological operations to remove some noise from a binary image
def remove_noise(binary_image):
# Fix small holes in foreground object with morphological closing
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
output_image = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel)
# I also evaluated different kernels for removing small foreground objects using morphological
# opening, but with bad result on the lane lines.
return output_image
# Use morphological operations to reduce foreground objects until only the skeleton remains
def skeletonize(binary_image):
size = np.size(binary_image)
skel_image = np.zeros(binary_image.shape, np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
while True:
eroded = cv2.erode(binary_image, kernel)
temp = cv2.dilate(eroded, kernel)
temp = cv2.subtract(binary_image, temp)
skel_image = cv2.bitwise_or(skel_image, temp)
binary_image = eroded.copy()
# Repeat until the binary image operations above have no more effect
zeros = size - cv2.countNonZero(binary_image)
if zeros == size:
break
return skel_image
def find_lines_by_colors(rgb_image):
demo_show(rgb_image, title='RGB input')
# Convert the RGB image to HSV to make color thresholding a little bit more robust, or at least easier.
hsv_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)
demo_show(hsv_image, title='HSV, displayed using RGB cmap')
# Threshold the image for yellow and white
yellow_mask = color_threshold_yellow(hsv_image)
demo_show(yellow_mask, title='Yellow threshold mask', cmap='gray')
white_mask = color_threshold_white(hsv_image)
demo_show(white_mask, title='White threshold mask', cmap='gray')
# Combine the yellow and white binary images
lines_mask = cv2.bitwise_or(yellow_mask, white_mask)
demo_show(lines_mask, title='Yellow + white mask', cmap='gray')
# Clear everything but region of interest
roi_lines_mask = region_of_interest(lines_mask, roi.get_vertices(lines_mask))
demo_show(roi_lines_mask, title='ROI masked', cmap='gray')
# Remove some holes in detected lines
filered_mask = remove_noise(roi_lines_mask)
demo_show(filered_mask, title='Morphological closed', cmap='gray')
# Reduce detected lines into just a skeleton
skeleton_image = skeletonize(filered_mask)
demo_show(skeleton_image, title='Skeletonized', cmap='gray')
# Define the Hough transform parameters
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 50 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 100 # minimum number of pixels making up a line
max_line_gap = 160 # maximum gap in pixels between connectible line segments
lines = cv2.HoughLinesP(skeleton_image, rho=rho, theta=theta, threshold=threshold,
minLineLength=min_line_length, maxLineGap=max_line_gap)
demo_show(rgb_image, title='Raw hough line segments', lines=lines)
return lines
# Show the steps in the color based raw line segments extraction
demo_color_lines = []
for image in demo_images:
demo_color_lines.append(find_lines_by_colors(image))
Look at raw line segments slope and y-axis interception to figure out which segments belong togheter.
class LinesData:
def __init__(self, lines, slopes=None, intercepts=None, lengths=None):
self.lines = lines
if slopes is not None:
self.slopes = slopes
else:
self.__calc_slopes()
if intercepts is not None:
self.intercepts = intercepts
else:
self.__calc_intercepts()
if lengths is not None:
self.lengths = lengths
else:
self.__calc_lengths()
def __calc_slopes(self):
slopes = []
for line in self.lines:
for x1, y1, x2, y2 in line:
slopes.append((y2 - y1) / (x2 - x1))
self.slopes = np.array(slopes)
def __calc_intercepts(self):
intercepts = []
for line, slope in zip(self.lines, self.slopes):
for x1, y1, x2, y2 in line:
intercepts.append(y1 - slope * x1)
self.intercepts = np.array(intercepts)
def __calc_lengths(self):
lengths = []
for line in self.lines:
for x1, y1, x2, y2 in line:
lengths.append(math.sqrt(math.pow(y1 - y2, 2) + math.pow(x1 - x2, 2)))
self.lengths = np.array(lengths)
def get_coords(self):
"""
Get the lines as coordinate arrays.
"""
x1_coords = []
x2_coords = []
y1_coords = []
y2_coords = []
for line in self.lines:
for x1, y1, x2, y2 in line:
x1_coords.append(x1)
y1_coords.append(y1)
x2_coords.append(x2)
y2_coords.append(y2)
return np.array(x1_coords), np.array(y1_coords), np.array(x2_coords), np.array(y2_coords)
def reorder(self, new_indices):
self.lines = self.lines[new_indices]
self.slopes = self.slopes[new_indices]
self.intercepts = self.intercepts[new_indices]
self.lengths = self.lengths[new_indices]
def calc_most_frequent_ranges(data, data_weigths, num_bins, total_range):
"""
Calculates a sorted list of data ranges that are the most frequently represented.
"""
hist, bin_edges = np.histogram(data, bins=num_bins, range=total_range,
weights=data_weigths)
sorted_hist_indices = np.argsort(hist)[::-1] # descending order
sorted_ranges = []
for idx in sorted_hist_indices:
if hist[idx] == 0:
# All remaining ranges are empty
break
# If possible, include neighboring bins. This is to be able to keep bin ranges small, but still
num_lower_neighbors = 1 if idx > 0 else 0
num_upper_neighbors = 1 if idx < (len(bin_edges) - 1) else 0
sorted_ranges.append((bin_edges[idx - num_lower_neighbors], bin_edges[idx + 1 + num_upper_neighbors]))
return sorted_ranges
def find_similar_lines(lines_data):
"""
Group together lines that have similar slope and y-axis intercept. This by first finding the two slope
ranges that seems to be most frequent. The assumption will be that they belong to left and right lane
line. The lengths of the lines are used as weights to not benefit noisy line detections.
"""
slope_ranges = calc_most_frequent_ranges(data=lines_data.slopes, data_weigths=lines_data.lengths,
num_bins=50, total_range=(-5.0, 5.0))
num_lines_to_find = 2
similar_lines = []
for line_count, slope_range in enumerate(slope_ranges):
# For now, only find the two most similar groups of lines
if line_count == 2:
break
# Gather all lines with expected slopes
slope_group_idxs = np.where(np.logical_and(lines_data.slopes >= slope_range[0],
lines_data.slopes < slope_range[1]))[0]
intercept_ranges = calc_most_frequent_ranges(data=lines_data.intercepts[slope_group_idxs],
data_weigths=lines_data.lengths[slope_group_idxs],
num_bins=40, total_range=(-2000, 2000))
# For now, ignore all but the most frequent intercept range
intercept_range = intercept_ranges[0]
result_group_idxs = slope_group_idxs[np.where(np.logical_and(
lines_data.intercepts[slope_group_idxs] >= intercept_range[0],
lines_data.intercepts[slope_group_idxs] <= intercept_range[1]))]
similar_lines.append(LinesData(lines=lines_data.lines[result_group_idxs],
slopes=lines_data.slopes[result_group_idxs],
intercepts=lines_data.intercepts[result_group_idxs],
lengths=lines_data.lengths[result_group_idxs]))
return similar_lines
# Show the steps in the grouping of the detected raw line segments
scatter_fig = plt.figure(figsize=(15, 15))
scatter_fig.suptitle("Grouped line segments", fontsize=16)
scatter_fig_idx = 0
demo_similar_lines_lists = []
for image, edge_lines, color_lines in zip(demo_images, demo_edge_lines, demo_color_lines):
# Combine the output from the raw line extractions above.
raw_line_segments = np.concatenate((edge_lines, color_lines), axis=0)
# Calculate slope and y-axis interception.
lines_data = LinesData(raw_line_segments)
scatter_fig_idx += 1
plt.figure(scatter_fig.number)
ax = plt.subplot(fig_rows, fig_cols, scatter_fig_idx)
ax.set_title(demo_names[scatter_fig_idx - 1])
ax.set_xlabel("slope")
ax.set_ylabel("intercept")
plt_image = np.copy(image) # Create a copy that can be modified before imshow
# Draw all lines and points in red. Some of them will be drawn in other colors below.
draw_lines(plt_image, lines_data.lines, color=[255, 0, 0], thickness=2)
c = ax.scatter(lines_data.slopes, lines_data.intercepts, c='r')
legend_data = (c,)
# Group similar lines.
similar_lines_list = find_similar_lines(lines_data)
# show the result
colors_char = ['g', 'b']
colors_rgb = [[0, 255, 0], [0, 0, 255]]
for color_idx, similar_lines in enumerate(similar_lines_list):
c = ax.scatter(similar_lines.slopes, similar_lines.intercepts, c=colors_char[color_idx])
legend_data += (c,)
draw_lines(plt_image, similar_lines.lines, color=colors_rgb[color_idx], thickness=2)
scatter_fig.legend(legend_data, ('Other lines', 'Grouped lines 1', 'Grouped lines 2'), 'upper left', fontsize=12)
demo_show(plt_image, title="Grouped line segments")
demo_similar_lines_lists.append(similar_lines_list) # Save result for later demo
Calculate average for the two most signficant line segments in this frame, then calculate the average of historical values, including the one for this frame. Historical values are used to acheive more stable output.
def calc_frame_average_line(lines_data):
# Start with line closest to camera (largest y-coordinate), assuming they are the most accurate,
# and that they at least should be included in average.
# Sort lines by bottom y-coordinate
_, y1_coords, _, y2_coords = lines_data.get_coords()
bottom_y_coords = np.where(y1_coords >= y2_coords, y1_coords, y2_coords)
sorted_indices = np.argsort(bottom_y_coords)[::-1] # descending order
lines_data.reorder(sorted_indices)
max_slope_deviation = 0.1 # Allowed deviation from current average
max_intercept_deviation = 20 # Allowed deviation from current average
average_slope = lines_data.slopes[0]
average_intercept = lines_data.intercepts[0]
average_weight = 0 # Don't count the above initialization
for line, slope, intercept, length in zip(lines_data.lines, lines_data.slopes, lines_data.intercepts, lines_data.lengths):
# Only use lines that not drift away too much from current average.
if (slope >= (average_slope - max_slope_deviation) and
slope <= (average_slope + max_slope_deviation) and
intercept >= (average_intercept - max_intercept_deviation) and
intercept <= (average_intercept + max_intercept_deviation)):
# Use line lengths as weights in this calculation to giver longer lines more substance in the average.
average_slope = (((average_slope * average_weight) + (slope * length)) /
(average_weight + length))
average_intercept = (((average_intercept * average_weight) + (intercept * length)) /
(average_weight + length))
average_weight += length
return (average_slope, average_intercept)
class Line_History:
# Some defines
HISTORY_LENGTH = 9
# The line will not be added if deviating more than this from current mean.
SLOPE_TOLERANCE = 0.2
INTERCEPT_TOLERANCE = 30.
# The history will start to be reduced if more than this number of add() fail.
# Note that there will be add attempts for each detected line.
MAX_FAILED_ADD_ATTEMPTS = 6
def __init__(self, initial_slope_range, name='Unknown'):
self.initial_slope_range = initial_slope_range
self.slopes = []
self.intercepts = []
self.add_attempts_since_last_success = 0
self.name = name
def __is_similar(self, slope, intercept):
"""
Check if slope and interception is similar to what's already in this history.
"""
# Initial limits
if len(self.slopes) == 0:
if ((slope > self.initial_slope_range[0]) and
(slope < self.initial_slope_range[1])):
return True
return False
mean_slope = np.mean(self.slopes)
mean_intercept = np.mean(self.intercepts)
if ((slope > mean_slope - Line_History.SLOPE_TOLERANCE) and
(slope < mean_slope + Line_History.SLOPE_TOLERANCE) and
(intercept > mean_intercept - Line_History.INTERCEPT_TOLERANCE) and
(intercept < mean_intercept + Line_History.INTERCEPT_TOLERANCE)):
return True
else:
return False
def __remove_oldest(self):
if len(self.slopes) > 0:
del self.slopes[0]
del self.intercepts[0]
def add(self, slope, intercept):
if not self.__is_similar(slope, intercept):
self.add_attempts_since_last_success += 1
if self.add_attempts_since_last_success > Line_History.MAX_FAILED_ADD_ATTEMPTS:
# Remove oldest values until new lines fit again.
self.__remove_oldest()
return
# The new line seems to belong here.
self.add_attempts_since_last_success = 0
self.slopes.append(slope)
self.intercepts.append(intercept)
# Limit the history length
if len(self.slopes) > Line_History.HISTORY_LENGTH:
self.__remove_oldest()
def get_average_line(self, image):
"""
Returns None if history is empty, otherwise the average line.
"""
if len(self.slopes) == 0:
debug_print(f"Frame nr:{frame_nr} {self.name}: No line")
return None
average_slope = np.mean(self.slopes)
average_intercept = np.mean(self.intercepts)
# Calculate coordinates for the average line within ROI y-range
roi_vertices = roi.get_vertices(image)
top_y = np.amin(roi_vertices, axis=1)[0][1]
bottom_y = image.shape[0] - 1 # np.amax(roi_vertices, axis=1)[0][1]
top_x = (top_y - average_intercept) / average_slope
bottom_x = (bottom_y - average_intercept) / average_slope
# Sanity check
x_min = 0
x_max = image.shape[1] - 1
if ((bottom_x < x_min) or
(bottom_x > x_max) or
(top_x < x_min) or
(top_x > x_max)):
# The line seems to be corrupt, reset history.
debug_print(f"Frame nr:{frame_nr} {self.name}: Corrupt line")
self.clear_history()
return None
else:
return np.round([[[bottom_x, bottom_y, top_x, top_y]]]).astype(int)
def clear_history(self):
self.slopes = []
self.intercepts = []
self.add_attempts_since_last_success = 0
# For now, only keep track of what's believed to be left and right lane line.
# ToDo: keep track of all significant lines and continuously (for each frame)
# evaluate which lines are the most likley lane lines.
history_lines = {
'left': Line_History(initial_slope_range=(-math.inf, 0.), name="left"),
'right': Line_History(initial_slope_range=(0., math.inf), name="right")
}
def clear_history_lines():
for _, history_line in history_lines.items():
history_line.clear_history()
def add_line_to_history(slope, intercept):
# Try to add the line to each history entry. It will be silently discarded if not fitting.
for _, history_line in history_lines.items():
history_line.add(slope, intercept)
def calc_lane_lines(raw_line_segments, image):
all_lines_data = LinesData(raw_line_segments)
similar_lines_list = find_similar_lines(all_lines_data)
for similar_lines in similar_lines_list:
frame_average_slope, frame_average_intercept = calc_frame_average_line(similar_lines)
add_line_to_history(frame_average_slope, frame_average_intercept)
lane_lines = []
for _, history_line in history_lines.items():
# For now: don't care which line is left or right
lane_line = history_line.get_average_line(image)
if lane_line is not None:
# Still get some missing detections.
lane_lines.append(lane_line)
return lane_lines
for image, similar_lines_list in zip(demo_images, demo_similar_lines_lists):
# Processing individual image samples, so the history makes no sense
clear_history_lines()
plt_image = np.copy(image) # Create a copy that can be modified before imshow
overlay = np.copy(image)
for similar_lines in similar_lines_list:
frame_average_slope, frame_average_intercept = calc_frame_average_line(similar_lines)
add_line_to_history(frame_average_slope, frame_average_intercept)
for _, history_line in history_lines.items():
draw_lines(overlay, history_line.get_average_line(image), color=[255, 0, 0], thickness=10)
alpha = 0.4
cv2.addWeighted(overlay, alpha, plt_image, 1 - alpha, 0, plt_image)
demo_show(plt_image, title="Average lines")
def process_image(image):
global frame_nr
frame_nr += 1
all_lines = None
result = np.copy(image)
overlay = np.copy(image)
edge_lines = find_lines_by_edges(image)
if edge_lines is not None:
all_lines = edge_lines
color_lines = find_lines_by_colors(image)
if color_lines is not None:
if all_lines is not None:
all_lines = np.concatenate((all_lines, color_lines), axis=0)
else:
all_lines = color_lines
lane_lines = calc_lane_lines(all_lines, image)
for line in lane_lines:
draw_lines(overlay, line, color=[255, 0, 0], thickness=10)
# Add ROI to overlay
draw_lines(overlay, lines=roi.get_lines(overlay), color=[0, 0, 255], thickness=1)
# Draw overlay on resulting image with alpha blending
alpha = 0.4
cv2.addWeighted(overlay, alpha, result, 1 - alpha, 0, result)
return result
def reset_image_processing_pipeline(roi_name='normal'):
"""
Prepare the processing pipeline for a new video.
"""
global frame_nr
frame_nr = 0
clear_history_lines()
roi.set_roi(name=roi_name)
def process_video_files():
reset_image_processing_pipeline()
output_file_name = 'test_videos_output/solidWhiteRight.mp4'
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
clip = clip1.fl_image(process_image)
clip.write_videofile(output_file_name, audio=False)
reset_image_processing_pipeline()
output_file_name = 'test_videos_output/solidYellowLeft.mp4'
clip1 = VideoFileClip("test_videos/solidYellowLeft.mp4")
clip = clip1.fl_image(process_image)
clip.write_videofile(output_file_name, audio=False)
reset_image_processing_pipeline(roi_name='challenge')
output_file_name = 'test_videos_output/challenge.mp4'
clip1 = VideoFileClip("test_videos/challenge.mp4")
clip = clip1.fl_image(process_image)
clip.write_videofile(output_file_name, audio=False)
# Disable demo_mode before processing the videos.
demo_mode = False
process_video_files()
from IPython.display import HTML
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format('test_videos_output/solidWhiteRight.mp4'))
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format('test_videos_output/solidYellowLeft.mp4'))
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format('test_videos_output/challenge.mp4'))